var runtime.work

236 uses

	runtime (current package)
		metrics.go#L674: 	a.cpuStats = work.cpuStats
		mgc.go#L196: 	work.startSema = 1
		mgc.go#L197: 	work.markDoneSema = 1
		mgc.go#L198: 	lockInit(&work.sweepWaiters.lock, lockRankSweepWaiters)
		mgc.go#L199: 	lockInit(&work.assistQueue.lock, lockRankAssistQueue)
		mgc.go#L200: 	lockInit(&work.strongFromWeak.lock, lockRankStrongFromWeakQueue)
		mgc.go#L201: 	lockInit(&work.wbufSpans.lock, lockRankWbufSpans)
		mgc.go#L323: var work workType
		mgc.go#L508: 	n := work.cycles.Load()
		mgc.go#L523: 	for work.cycles.Load() == n+1 && sweepone() != ^uintptr(0) {
		mgc.go#L538: 	for work.cycles.Load() == n+1 && !isSweepDone() {
		mgc.go#L546: 	cycle := work.cycles.Load()
		mgc.go#L558: 		lock(&work.sweepWaiters.lock)
		mgc.go#L559: 		nMarks := work.cycles.Load()
		mgc.go#L566: 			unlock(&work.sweepWaiters.lock)
		mgc.go#L572: 		work.sweepWaiters.list.push(getg())
		mgc.go#L573: 		goparkunlock(&work.sweepWaiters.lock, waitReasonWaitForGCCycle, traceBlockUntilGCEnds, 1)
		mgc.go#L632: 		return int32(t.n-work.cycles.Load()) > 0
		mgc.go#L682: 	semacquire(&work.startSema)
		mgc.go#L685: 		semrelease(&work.startSema)
		mgc.go#L706: 	work.userForced = trigger.kind == gcTriggerCycle
		mgc.go#L730: 	work.stwprocs, work.maxprocs = gomaxprocs, gomaxprocs
		mgc.go#L731: 	if work.stwprocs > numCPUStartup {
		mgc.go#L734: 		work.stwprocs = numCPUStartup
		mgc.go#L736: 	work.heap0 = gcController.heapLive.Load()
		mgc.go#L737: 	work.pauseNS = 0
		mgc.go#L738: 	work.mode = mode
		mgc.go#L741: 	work.tSweepTerm = now
		mgc.go#L748: 	work.cpuStats.accumulateGCPauseTime(stw.stoppingCPUTime, 1)
		mgc.go#L759: 	work.cycles.Add(1)
		mgc.go#L817: 	work.cpuStats.accumulateGCPauseTime(nanotime()-stw.finishedStopping, work.maxprocs)
		mgc.go#L822: 		work.pauseNS += now - stw.startedStopping
		mgc.go#L823: 		work.tMark = now
		mgc.go#L842: 	semrelease(&work.startSema)
		mgc.go#L890: 	semacquire(&work.markDoneSema)
		mgc.go#L899: 	if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) {
		mgc.go#L900: 		semrelease(&work.markDoneSema)
		mgc.go#L910: 	work.strongFromWeak.block = true
		mgc.go#L951: 	work.tMarkTerm = now
		mgc.go#L962: 	work.cpuStats.accumulateGCPauseTime(stw.stoppingCPUTime, 1)
		mgc.go#L989: 			work.cpuStats.accumulateGCPauseTime(nanotime()-stw.finishedStopping, work.maxprocs)
		mgc.go#L993: 			work.pauseNS += now - stw.startedStopping
		mgc.go#L1014: 	work.strongFromWeak.block = false
		mgc.go#L1020: 	semrelease(&work.markDoneSema)
		mgc.go#L1029: 	gcController.endCycle(now, int(gomaxprocs), work.userForced)
		mgc.go#L1041: 	work.heap1 = gcController.heapLive.Load()
		mgc.go#L1071: 		work.heap2 = work.bytesMarked
		mgc.go#L1081: 		stwSwept = gcSweep(work.mode)
		mgc.go#L1111: 	work.pauseNS += now - stw.startedStopping
		mgc.go#L1112: 	work.tEnd = now
		mgc.go#L1115: 	memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS)
		mgc.go#L1117: 	memstats.pause_total_ns += uint64(work.pauseNS)
		mgc.go#L1127: 	work.cpuStats.accumulateGCPauseTime(now-stw.finishedStopping, work.maxprocs)
		mgc.go#L1128: 	work.cpuStats.accumulate(now, true)
		mgc.go#L1132: 	memstats.gc_cpu_fraction = float64(work.cpuStats.GCTotalTime-work.cpuStats.GCIdleTime) / float64(work.cpuStats.TotalTime)
		mgc.go#L1144: 	if work.userForced {
		mgc.go#L1149: 	lock(&work.sweepWaiters.lock)
		mgc.go#L1151: 	injectglist(&work.sweepWaiters.list)
		mgc.go#L1152: 	unlock(&work.sweepWaiters.lock)
		mgc.go#L1260: 			" @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ",
		mgc.go#L1262: 		prev := work.tSweepTerm
		mgc.go#L1263: 		for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} {
		mgc.go#L1272: 			int64(work.stwprocs) * (work.tMark - work.tSweepTerm),
		mgc.go#L1276: 			int64(work.stwprocs) * (work.tEnd - work.tMarkTerm),
		mgc.go#L1287: 			work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ",
		mgc.go#L1291: 			work.maxprocs, " P")
		mgc.go#L1292: 		if work.userForced {
		mgc.go#L1403: 	work.nproc = ^uint32(0)
		mgc.go#L1404: 	work.nwait = ^uint32(0)
		mgc.go#L1517: 		decnwait := atomic.Xadd(&work.nwait, -1)
		mgc.go#L1518: 		if decnwait == work.nproc {
		mgc.go#L1519: 			println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
		mgc.go#L1578: 		incnwait := atomic.Xadd(&work.nwait, +1)
		mgc.go#L1579: 		if incnwait > work.nproc {
		mgc.go#L1581: 				"work.nwait=", incnwait, "work.nproc=", work.nproc)
		mgc.go#L1593: 		if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
		mgc.go#L1612: 	if !work.full.empty() || !work.spanq.empty() {
		mgc.go#L1615: 	if work.markrootNext < work.markrootJobs {
		mgc.go#L1628: 	work.tstart = startTime
		mgc.go#L1631: 	if work.full != 0 || work.markrootNext < work.markrootJobs || !work.spanq.empty() {
		mgc.go#L1632: 		print("runtime: full=", hex(work.full), " next=", work.markrootNext, " jobs=", work.markrootJobs, " nDataRoots=", work.nDataRoots, " nBSSRoots=", work.nBSSRoots, " nSpanRoots=", work.nSpanRoots, " nStackRoots=", work.nStackRoots, " spanq.n=", work.spanq.size(), "\n")
		mgc.go#L1645: 	work.stackRoots = nil
		mgc.go#L1706: 	gcController.resetLive(work.bytesMarked)
		mgc.go#L1798: 	work.bytesMarked = 0
		mgc.go#L1799: 	work.initialHeapLive = gcController.heapLive.Load()
		mgcmark.go#L68: 	work.nDataRoots = 0
		mgcmark.go#L69: 	work.nBSSRoots = 0
		mgcmark.go#L74: 		if nDataRoots > work.nDataRoots {
		mgcmark.go#L75: 			work.nDataRoots = nDataRoots
		mgcmark.go#L79: 		if nBSSRoots > work.nBSSRoots {
		mgcmark.go#L80: 			work.nBSSRoots = nBSSRoots
		mgcmark.go#L97: 	work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
		mgcmark.go#L105: 	work.stackRoots = allGsSnapshot()
		mgcmark.go#L106: 	work.nStackRoots = len(work.stackRoots)
		mgcmark.go#L108: 	work.markrootNext = 0
		mgcmark.go#L109: 	work.markrootJobs = uint32(fixedRootCount + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
		mgcmark.go#L112: 	work.baseData = uint32(fixedRootCount)
		mgcmark.go#L113: 	work.baseBSS = work.baseData + uint32(work.nDataRoots)
		mgcmark.go#L114: 	work.baseSpans = work.baseBSS + uint32(work.nBSSRoots)
		mgcmark.go#L115: 	work.baseStacks = work.baseSpans + uint32(work.nSpanRoots)
		mgcmark.go#L116: 	work.baseEnd = work.baseStacks + uint32(work.nStackRoots)
		mgcmark.go#L122: 	if work.markrootNext < work.markrootJobs {
		mgcmark.go#L123: 		print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
		mgcmark.go#L134: 		if i >= work.nStackRoots {
		mgcmark.go#L168: 	case work.baseData <= i && i < work.baseBSS:
		mgcmark.go#L171: 			workDone += markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-work.baseData))
		mgcmark.go#L174: 	case work.baseBSS <= i && i < work.baseSpans:
		mgcmark.go#L177: 			workDone += markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-work.baseBSS))
		mgcmark.go#L199: 	case work.baseSpans <= i && i < work.baseStacks:
		mgcmark.go#L201: 		markrootSpans(gcw, int(i-work.baseSpans))
		mgcmark.go#L206: 		if i < work.baseStacks || work.baseEnd <= i {
		mgcmark.go#L208: 			print("runtime: markroot index ", i, " not in stack roots range [", work.baseStacks, ", ", work.baseEnd, ")\n")
		mgcmark.go#L211: 		gp := work.stackRoots[i-work.baseStacks]
		mgcmark.go#L217: 			gp.waitsince = work.tstart
		mgcmark.go#L678: 	decnwait := atomic.Xadd(&work.nwait, -1)
		mgcmark.go#L679: 	if decnwait == work.nproc {
		mgcmark.go#L680: 		println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
		mgcmark.go#L705: 	incnwait := atomic.Xadd(&work.nwait, +1)
		mgcmark.go#L706: 	if incnwait > work.nproc {
		mgcmark.go#L708: 			"work.nproc=", work.nproc)
		mgcmark.go#L712: 	if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
		mgcmark.go#L737: 	lock(&work.assistQueue.lock)
		mgcmark.go#L738: 	list := work.assistQueue.q.popList()
		mgcmark.go#L740: 	unlock(&work.assistQueue.lock)
		mgcmark.go#L748: 	lock(&work.assistQueue.lock)
		mgcmark.go#L753: 		unlock(&work.assistQueue.lock)
		mgcmark.go#L758: 	oldList := work.assistQueue.q
		mgcmark.go#L759: 	work.assistQueue.q.pushBack(gp)
		mgcmark.go#L766: 		work.assistQueue.q = oldList
		mgcmark.go#L770: 		unlock(&work.assistQueue.lock)
		mgcmark.go#L774: 	goparkunlock(&work.assistQueue.lock, waitReasonGCAssistWait, traceBlockGCMarkAssist, 2)
		mgcmark.go#L789: 	if work.assistQueue.q.empty() {
		mgcmark.go#L801: 	lock(&work.assistQueue.lock)
		mgcmark.go#L802: 	for !work.assistQueue.q.empty() && scanBytes > 0 {
		mgcmark.go#L803: 		gp := work.assistQueue.q.pop()
		mgcmark.go#L825: 			work.assistQueue.q.pushBack(gp)
		mgcmark.go#L836: 	unlock(&work.assistQueue.lock)
		mgcmark.go#L1198: 	if work.markrootNext < work.markrootJobs {
		mgcmark.go#L1202: 			job := atomic.Xadd(&work.markrootNext, +1) - 1
		mgcmark.go#L1203: 			if job >= work.markrootJobs {
		mgcmark.go#L1237: 		if work.full == 0 {
		mgcmark.go#L1333: 		if work.full == 0 {
		mgcmark.go#L1349: 						if work.markrootNext < work.markrootJobs {
		mgcmark.go#L1350: 							job := atomic.Xadd(&work.markrootNext, +1) - 1
		mgcmark.go#L1351: 							if job < work.markrootJobs {
		mgcpacer.go#L463: 			work.initialHeapLive>>20, "->",
		mgcpacer.go#L1305: 		gcWaitOnMark(work.cycles.Load())
		mgcwork.go#L161: 	lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
		mgcwork.go#L329: 		atomic.Xadd64(&work.bytesMarked, int64(w.bytesMarked))
		mgcwork.go#L412: 	if work.empty != 0 {
		mgcwork.go#L413: 		b = (*workbuf)(work.empty.pop())
		mgcwork.go#L420: 	lockWithRankMayAcquire(&work.wbufSpans.lock, lockRankWbufSpans)
		mgcwork.go#L425: 		if work.wbufSpans.free.first != nil {
		mgcwork.go#L426: 			lock(&work.wbufSpans.lock)
		mgcwork.go#L427: 			s = work.wbufSpans.free.first
		mgcwork.go#L429: 				work.wbufSpans.free.remove(s)
		mgcwork.go#L430: 				work.wbufSpans.busy.insert(s)
		mgcwork.go#L432: 			unlock(&work.wbufSpans.lock)
		mgcwork.go#L442: 			lock(&work.wbufSpans.lock)
		mgcwork.go#L443: 			work.wbufSpans.busy.insert(s)
		mgcwork.go#L444: 			unlock(&work.wbufSpans.lock)
		mgcwork.go#L468: 	work.empty.push(&b.node)
		mgcwork.go#L478: 	work.full.push(&b.node)
		mgcwork.go#L486: 	b := (*workbuf)(work.full.pop())
		mgcwork.go#L512: 	lock(&work.wbufSpans.lock)
		mgcwork.go#L513: 	if work.full != 0 {
		mgcwork.go#L519: 	work.empty = 0
		mgcwork.go#L520: 	work.wbufSpans.free.takeAll(&work.wbufSpans.busy)
		mgcwork.go#L521: 	unlock(&work.wbufSpans.lock)
		mgcwork.go#L528: 	lock(&work.wbufSpans.lock)
		mgcwork.go#L529: 	if gcphase != _GCoff || work.wbufSpans.free.isEmpty() {
		mgcwork.go#L530: 		unlock(&work.wbufSpans.lock)
		mgcwork.go#L536: 			span := work.wbufSpans.free.first
		mgcwork.go#L540: 			work.wbufSpans.free.remove(span)
		mgcwork.go#L544: 	more := !work.wbufSpans.free.isEmpty()
		mgcwork.go#L545: 	unlock(&work.wbufSpans.lock)
		mheap.go#L2463: 	if work.strongFromWeak.block {
		mheap.go#L2525: 	for work.strongFromWeak.block {
		mheap.go#L2526: 		lock(&work.strongFromWeak.lock)
		mheap.go#L2530: 		work.strongFromWeak.q.pushBack(getg())
		mheap.go#L2533: 		goparkunlock(&work.strongFromWeak.lock, waitReasonGCWeakToStrongWait, traceBlockGCWeakToStrongWait, 2)
		mheap.go#L2549: 	lock(&work.strongFromWeak.lock)
		mheap.go#L2550: 	list := work.strongFromWeak.q.popList()
		mheap.go#L2552: 	unlock(&work.strongFromWeak.lock)